static void __netif_up(netif_t *netif)
{
- struct net_device *dev = netif->dev;
- netif_tx_lock_bh(dev);
- netif->active = 1;
- netif_tx_unlock_bh(dev);
enable_irq(netif->irq);
netif_schedule_work(netif);
}
static void __netif_down(netif_t *netif)
{
- struct net_device *dev = netif->dev;
disable_irq(netif->irq);
- netif_tx_lock_bh(dev);
- netif->active = 0;
- netif_tx_unlock_bh(dev);
netif_deschedule_work(netif);
}
static int net_open(struct net_device *dev)
{
netif_t *netif = netdev_priv(dev);
- if (netif->status == CONNECTED)
+ if (netif_carrier_ok(dev))
__netif_up(netif);
netif_start_queue(dev);
return 0;
{
netif_t *netif = netdev_priv(dev);
netif_stop_queue(dev);
- if (netif->status == CONNECTED)
+ if (netif_carrier_ok(dev))
__netif_down(netif);
return 0;
}
return ERR_PTR(-ENOMEM);
}
+ netif_carrier_off(dev);
+
netif = netdev_priv(dev);
memset(netif, 0, sizeof(*netif));
netif->domid = domid;
netif->handle = handle;
- netif->status = DISCONNECTED;
atomic_set(&netif->refcnt, 1);
init_waitqueue_head(&netif->waiting_to_free);
netif->dev = dev;
netif->rx_req_cons_peek = 0;
netif_get(netif);
- wmb(); /* Other CPUs see new state before interface is started. */
rtnl_lock();
- netif->status = CONNECTED;
- wmb();
+ netif_carrier_on(netif->dev);
if (netif_running(netif->dev))
__netif_up(netif);
rtnl_unlock();
void netif_disconnect(netif_t *netif)
{
- switch (netif->status) {
- case CONNECTED:
+ if (netif_carrier_ok(netif->dev)) {
rtnl_lock();
- netif->status = DISCONNECTING;
- wmb();
+ netif_carrier_off(netif->dev);
if (netif_running(netif->dev))
__netif_down(netif);
rtnl_unlock();
netif_put(netif);
- /* fall through */
- case DISCONNECTED:
- netif_free(netif);
- break;
- default:
- BUG();
}
+ netif_free(netif);
}
BUG_ON(skb->dev != dev);
/* Drop the packet if the target domain has no receive buffers. */
- if (!netif->active ||
+ if (unlikely(!netif_running(dev) || !netif_carrier_ok(dev)) ||
(netif->rx_req_cons_peek == netif->rx.sring->req_prod) ||
((netif->rx_req_cons_peek - netif->rx.rsp_prod_pvt) ==
NET_RX_RING_SIZE))
return;
spin_lock_irq(&net_schedule_list_lock);
- if (!__on_net_schedule_list(netif) && netif->active) {
+ if (!__on_net_schedule_list(netif) &&
+ likely(netif_running(netif->dev) &&
+ netif_carrier_ok(netif->dev))) {
list_add_tail(&netif->list, &net_schedule_list);
netif_get(netif);
}